d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
#endif
+
+ shadow_lock_init(d);
}
}
reset_stack_and_jump(vmx_asm_do_launch);
}
-static void monitor_mk_pagetable(struct exec_domain *ed)
+static void alloc_monitor_pagetable(struct exec_domain *ed)
{
unsigned long mpfn;
l2_pgentry_t *mpl2e, *phys_table;
struct pfn_info *mpfn_info;
struct domain *d = ed->domain;
+ ASSERT(!ed->arch.monitor_table); /* we should only get called once */
+
mpfn_info = alloc_domheap_page(NULL);
ASSERT( mpfn_info );
HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
ed->arch.monitor_table = mk_pagetable(mpfn << PAGE_SHIFT);
- d->arch.shadow_mode = SHM_full_32;
mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK)
/*
* Free the pages for monitor_table and guest_pl2e_cache
*/
-static void monitor_rm_pagetable(struct exec_domain *ed)
+static void free_monitor_pagetable(struct exec_domain *ed)
{
l2_pgentry_t *mpl2e;
unsigned long mpfn;
goto out;
}
- monitor_mk_pagetable(ed);
ed->arch.schedule_tail = arch_vmx_do_launch;
clear_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state);
if (ed == ed->domain->exec_domain[0]) {
/*
* Required to do this once per domain
+ * XXX todo: add a seperate function to do these.
*/
memset(&ed->domain->shared_info->evtchn_mask[0], 0xff,
sizeof(ed->domain->shared_info->evtchn_mask));
clear_bit(IOPACKET_PORT, &ed->domain->shared_info->evtchn_mask[0]);
+
+ /* Put the domain in shadow mode even though we're going to be using
+ * the shared 1:1 page table initially. It shouldn't hurt */
+ shadow_mode_enable(ed->domain, SHM_full_32);
}
+ update_pagetables(ed); /* this assigns shadow_pagetable */
+ alloc_monitor_pagetable(ed); /* this assigns monitor_pagetable */
+
return 0;
out:
}
#endif
+
+/* This is called by arch_final_setup_guest and do_boot_vcpu */
int arch_final_setup_guest(
struct exec_domain *d, full_execution_context_t *c)
{
d->arch.failsafe_address = c->failsafe_callback_eip;
phys_basetab = c->pt_base;
- d->arch.guest_table = mk_pagetable(phys_basetab);
- d->arch.phys_table = d->arch.guest_table;
+ d->arch.guest_table = d->arch.phys_table = mk_pagetable(phys_basetab);
+
if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d->domain,
PGT_base_page_table) )
return -EINVAL;
return vmx_final_setup_guest(d, c);
#endif
+ update_pagetables(d); /* this assigns shadow_pagetable
+ and monitor_table */
+
return 0;
}
{
n->arch.flags |= TF_kernel_mode;
__asm__ __volatile__ ( "swapgs" );
+ update_pagetables(ed);
write_ptbase(n);
}
ed->arch.flags &= ~TF_kernel_mode;
__asm__ __volatile__ ( "swapgs" );
+ update_pagetables(ed);
write_ptbase(ed);
regs->rip = stu.rip;
free_vmcs(ed->arch.arch_vmx.vmcs);
ed->arch.arch_vmx.vmcs = 0;
- monitor_rm_pagetable(ed);
+ free_monitor_pagetable(ed);
rem_ac_timer(&(vpit->pit_timer));
}
#endif
void write_ptbase(struct exec_domain *ed)
{
- struct domain *d = ed->domain;
- unsigned long pa;
-
-#ifdef CONFIG_VMX
- if ( unlikely(shadow_mode(d)) )
- pa = ((shadow_mode(d) == SHM_full_32) ?
- pagetable_val(ed->arch.monitor_table) :
- pagetable_val(ed->arch.shadow_table));
- else
- pa = pagetable_val(ed->arch.guest_table);
-#else
- if ( unlikely(shadow_mode(d)) )
- pa = pagetable_val(ed->arch.shadow_table);
-#ifdef __x86_64__
- else if ( !(ed->arch.flags & TF_kernel_mode) )
- pa = pagetable_val(ed->arch.guest_table_user);
-#endif
- else
- pa = pagetable_val(ed->arch.guest_table);
-#endif
-
- write_cr3(pa);
+ write_cr3(pagetable_val(ed->arch.monitor_table));
}
static void __invalidate_shadow_ldt(struct exec_domain *d)
percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
old_base_pfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
ed->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
-
- shadow_mk_pagetable(ed);
+ update_pagetables(ed); /* update shadow_table and monitor_table */
write_ptbase(ed);
free_domheap_page(page);
}
-static void free_shadow_state(struct domain *d)
+void free_shadow_state(struct domain *d)
{
int i, free = 0;
struct shadow_status *x, *n;
{
}
-int shadow_mode_enable(struct domain *d, unsigned int mode)
+
+int __shadow_mode_enable(struct domain *d, unsigned int mode)
{
- d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
- if ( d->arch.shadow_ht == NULL )
- goto nomem;
- memset(d->arch.shadow_ht, 0,
+ if (!d->arch.shadow_ht)
+ {
+ d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
+ if ( d->arch.shadow_ht == NULL )
+ goto nomem;
+
+ memset(d->arch.shadow_ht, 0,
shadow_ht_buckets * sizeof(struct shadow_status));
+ }
- if ( mode == SHM_logdirty )
+ if ( mode == SHM_logdirty && !d->arch.shadow_dirty_bitmap)
{
d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
d->arch.shadow_dirty_bitmap =
d->arch.shadow_mode = mode;
- __shadow_mk_pagetable(d->exec_domain[0]); /* XXX SMP */
return 0;
nomem:
return -ENOMEM;
}
+int shadow_mode_enable(struct domain *d, unsigned int mode)
+{
+ int rc;
+ shadow_lock(d);
+ rc = __shadow_mode_enable(d, mode);
+ shadow_unlock(d);
+ return rc;
+}
+
void __shadow_mode_disable(struct domain *d)
{
struct shadow_status *x, *n;
{
unsigned int op = sc->op;
int i, rc = 0;
+ struct exec_domain *ed;
ASSERT(spin_is_locked(&d->arch.shadow_lock));
SH_VLOG("shadow mode table op : page count %d", d->arch.shadow_page_count);
shadow_audit(d, 1);
- __shadow_mk_pagetable(d->exec_domain[0]); /* XXX SMP */
+
+ for_each_exec_domain(d,ed)
+ __update_pagetables(ed);
+
return rc;
}
{
unsigned int op = sc->op;
int rc = 0;
+ struct exec_domain *ed;
if ( unlikely(d == current->domain) )
{
case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
shadow_mode_disable(d);
- rc = shadow_mode_enable(d, SHM_test);
+ rc = __shadow_mode_enable(d, SHM_test);
break;
case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
shadow_mode_disable(d);
- rc = shadow_mode_enable(d, SHM_logdirty);
+ rc = __shadow_mode_enable(d, SHM_logdirty);
break;
default:
shadow_unlock(d);
+ for_each_exec_domain(d,ed)
+ update_pagetables(ed);
+
domain_unpause(d);
return rc;
#include <asm/vmx.h>
#include <asm/vmx_vmcs.h>
#include <asm/vmx_intercept.h>
+#include <asm/shadow.h>
#include <public/io/ioreq.h>
#ifdef CONFIG_VMX
domain_crash(); /* need to take a clean path */
}
old_base_pfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
+
+ /* We know that none of the previous 1:1 shadow pages are
+ * going to be used again, so might as well flush them.
+ * XXXX wait until the last VCPU boots before doing the flush !!
+ */
+ shadow_lock(d->domain);
+ free_shadow_state(d->domain); // XXX SMP
+ shadow_unlock(d->domain);
+
/*
* Now arch.guest_table points to machine physical.
*/
d->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
+ update_pagetables(d);
VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx\n",
(unsigned long) (pfn << PAGE_SHIFT));
- shadow_lock(d->domain);
- shadow_mode_enable(d->domain, SHM_full_32);
- shadow_unlock(d->domain);
-
__vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
/*
- * mm->shadow_table should hold the next CR3 for shadow
+ * arch->shadow_table should hold the next CR3 for shadow
*/
VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, pfn = %lx\n",
d->arch.arch_vmx.cpu_cr3, pfn);
+ /* undo the get_page done in the para virt case */
put_page_and_type(&frame_table[old_base_pfn]);
}
unsigned long pfn;
/*
- * If paging is not enabled yet, simply copy the valut to CR3.
+ * If paging is not enabled yet, simply copy the value to CR3.
*/
if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
d->arch.arch_vmx.cpu_cr3 = value;
- return;
+ break;
}
guest_pl2e_cache_invalidate(d);
}
pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
vmx_shadow_clear_state(d->domain);
- d->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
- shadow_mk_pagetable(d);
+ d->arch.guest_table = mk_pagetable(pfn << PAGE_SHIFT);
+ update_pagetables(d);
/*
- * mm->shadow_table should hold the next CR3 for shadow
+ * arch.shadow_table should now hold the next CR3 for shadow
*/
d->arch.arch_vmx.cpu_cr3 = value;
VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx\n",
error |= __vmwrite(GUEST_TR_BASE, 0);
error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
- ed->arch.shadow_table = ed->arch.guest_table;
__vmwrite(GUEST_CR3, pagetable_val(ed->arch.guest_table));
__vmwrite(HOST_CR3, pagetable_val(ed->arch.monitor_table));
__vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
d->shared_info->n_vcpu = smp_num_cpus;
+ /* setup shadow and monitor tables */
+ update_pagetables(ed);
+
/* Install the new page tables. */
__cli();
write_ptbase(ed);
#ifndef NDEBUG
if (0) /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
{
- shadow_lock(d);
shadow_mode_enable(d, SHM_test);
- shadow_unlock(d);
+ update_pagetable(ed); /* XXX SMP */
}
#endif
-/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
+/* -*- Modes:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
/******************************************************************************
* domain_build.c
*
d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
d->shared_info->n_vcpu = smp_num_cpus;
+ /* setup shadow and monitor tables */
+ update_pagetable(ed);
+
/* Install the new page tables. */
__cli();
write_ptbase(ed);
atomic_set(&d->refcnt, 1);
atomic_set(&ed->pausecnt, 0);
- shadow_lock_init(d);
-
d->id = dom_id;
ed->processor = cpu;
d->create_time = NOW();
ed = d->exec_domain[vcpu];
atomic_set(&ed->pausecnt, 0);
- shadow_lock_init(d);
memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch));
#define IDLE0_ARCH_EXEC_DOMAIN \
{ \
perdomain_ptes: 0, \
- guest_table: mk_pagetable(__pa(idle_pg_table)) \
+ monitor_table: mk_pagetable(__pa(idle_pg_table)) \
}
#endif /* __ASM_DOMAIN_H__ */
extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde);
extern void unshadow_table(unsigned long gpfn, unsigned int type);
extern int shadow_mode_enable(struct domain *p, unsigned int mode);
+extern void free_shadow_state(struct domain *d);
#ifdef CONFIG_VMX
extern void vmx_shadow_clear_state(struct domain *);
#endif /* CONFIG_VMX */
-static inline void __shadow_mk_pagetable(struct exec_domain *ed)
+static inline void __update_pagetables(struct exec_domain *ed)
{
struct domain *d = ed->domain;
unsigned long gpfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
- SH_VVLOG("0: __shadow_mk_pagetable(gpfn=%p, smfn=%p)", gpfn, smfn);
+ SH_VVLOG("0: __update_pagetables(gpfn=%p, smfn=%p)", gpfn, smfn);
if ( unlikely(smfn == 0) )
smfn = shadow_l2_table(d, gpfn);
#ifdef CONFIG_VMX
else
if (d->arch.shadow_mode == SHM_full_32)
+ {
vmx_update_shadow_state(ed, gpfn, smfn);
+ }
#endif
ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
+
+ if (d->arch.shadow_mode != SHM_full_32)
+ ed->arch.monitor_table = ed->arch.shadow_table;
}
-static inline void shadow_mk_pagetable(struct exec_domain *ed)
+static inline void update_pagetables(struct exec_domain *ed)
{
if ( unlikely(shadow_mode(ed->domain)) )
{
- SH_VVLOG("shadow_mk_pagetable( gptbase=%p, mode=%d )",
+ SH_VVLOG("update_pagetables( gptbase=%p, mode=%d )",
pagetable_val(ed->arch.guest_table),
shadow_mode(ed->domain));
shadow_lock(ed->domain);
- __shadow_mk_pagetable(ed);
+ __update_pagetables(ed);
shadow_unlock(ed->domain);
- SH_VVLOG("leaving shadow_mk_pagetable:\n"
- "( gptbase=%p, mode=%d ) sh=%p",
- pagetable_val(ed->arch.guest_table),
- shadow_mode(ed->domain),
- pagetable_val(ed->arch.shadow_table) );
+ SH_VVLOG("leaving update_pagetables:\n"
+ "( gptbase=%p, mode=%d ) sh=%p",
+ pagetable_val(ed->arch.guest_table),
+ shadow_mode(ed->domain),
+ pagetable_val(ed->arch.shadow_table) );
}
+ else
+#ifdef __x86_64__
+ if ( !(ed->arch.flags & TF_kernel_mode) )
+ ed->arch.monitor_table = ed->arch.guest_table_user;
+ else
+#endif
+ ed->arch.monitor_table = ed->arch.guest_table;
+
}
#if SHADOW_DEBUG